library(psych)
library(ggplot2)
library(tidyverse)
library(effects)
## Library for the multiple comparisons
library(phia)
## Library to compute the effect sizes
library(sjstats)
library(lsr)
library(lavaan)
library(ggthemes)
library(tibble)
library(car)
library(olsrr)
library(pwr)
library(car)

library(R2jags)
library(rjags)
library(runjags)
library("lattice")
library("superdiag")
library(gtools)

library(bayestestR) 
library("ggmcmc")
library(brms)
library(BayesFactor)
library(MCMCvis)
library(readr)
attobo2 <- read_csv("attobo2_clean.csv")

source('Useful Code V2.R', echo=FALSE)
# failure$scenario <- as.factor(failure$scenario)
attobo2$scenario <- as.factor(attobo2$scenario)
# success$scenario <- as.factor(success$scenario)
attobo2$valence <- as.factor(attobo2$valence)
attobo2$attribution <- as.factor(attobo2$attribution)

attobo2$expectancy <- (attobo2$expectancy - 1)/10
attobo2$expectancy <- ifelse(attobo2$expectancy == 1, .999,
                     ifelse(attobo2$expectancy == 0, .001, attobo2$expectancy) )

attobo2$attribution_effort <- factor(attobo2$attribution, levels = c("Effort", "Strategy", "Aptitude"))
attobo2$attribution_strat <- factor(attobo2$attribution, levels = c( "Strategy","Effort", "Aptitude"))

attobo2$redo.feedback <- ifelse(attobo2$redo == 1 & attobo2$method == 1, 1,0)

attobo2$redo.nofeedback <- ifelse(attobo2$redo == 1 & attobo2$method == 0, 1,0)

attobo2.fail <- attobo2 %>% filter(valence == "Fail")

IRR

sum(attobo2$method_den == attobo2$method_val)/length(attobo2$method_val)
## [1] 0.9246575
sum(attobo2$bruteforce_den == attobo2$method_val)/length(attobo2$bruteforce_val)
## [1] 0.7431507
sum(attobo2$redo_den == attobo2$method_val)/length(attobo2$redo_val)
## [1] 0.8219178

Descriptive Stats

## Sample Breakdown
group_by(attobo2, attribution) %>%
  summarise(n())
##Descriptive stats for the DVs

##method
group_by(attobo2, attribution, valence) %>%
  filter(method == 1) %>%
  summarise(n())
##bruteforce
group_by(attobo2, attribution, valence) %>%
  filter(bruteforce == 1) %>%
  summarise(n())
##redo
group_by(attobo2, attribution, valence) %>%
  filter(redo == 1) %>%
  summarise(n())
##threat
group_by(attobo2, attribution, valence) %>%
  filter(threat == 1) %>%
  summarise(n())
##tangible rewards
group_by(attobo2, attribution, valence) %>%
  filter(tangiblerewards == 1) %>%
  summarise(n())
##Zooming into redo
redo <- attobo2 %>%
  filter(redo == 1)

group_by(attobo2, redo, scenario) %>%
  summarise(n())
##redo with methodological Feedback
group_by(attobo2, redo, scenario) %>%
  filter(method == 1) %>%
  summarise(n())
group_by(attobo2, scenario)%>%
  summarize(method = sum(method == 1&redo==1)/sum(redo==1) )
##expectancy
attobo2 %>%
  group_by(scenario) %>%
  summarise(meanexpectancy = mean(expectancy, na.rm=T),
            sdexpectancy = sd(expectancy, na.rm = T))
noswitch <- attobo2 %>%
  filter(switch == 0)

noswitch %>%
  group_by(scenario) %>%
  count()
noswitch %>%
  group_by(scenario) %>%
  summarise(meanexpectancy = mean(expectancy, na.rm=T),
            sdexpectancy = sd(expectancy, na.rm = T))
####Demographics####

attobo2 %>%
  group_by(gender) %>%
  count()
164/292 #males
## [1] 0.5616438
mean(attobo2$age)
## [1] 37.83219
sd(attobo2$age)
## [1] 11.51852
attobo2.advice <- attobo2 %>%
  select(method, bruteforce, threat, tangiblerewards,
         switch, expectancy)

attobo2.attribution <- attobo2 %>%
  select(aptout, aptvar, aptuncon, 
         effortout, effortvar, effortuncon,
         stratout, stratvar, stratuncon,
         luckout, luckvar, luckuncon,
         taskout, taskvar, taskuncon,
         moodout, moodvar, mooduncon)

attobo2.others <- attobo2 %>%
  select(gender, age, education, sms,
         smo, so, race)

ggplot(data = gather(attobo2.advice, factor_key = TRUE), aes(x = factor(value))) +
  geom_bar() + 
  facet_wrap(~ key,scales = "free", as.table = TRUE, nrow = 3) + xlab("") + 
  theme_bw()

ggplot(data = gather(attobo2.attribution, factor_key = TRUE), aes(x = factor(value))) +
  geom_bar() + 
  facet_wrap(~ key,scales = "free", as.table = TRUE, nrow = 3) + xlab("") + 
  theme_bw()

ggplot(data = gather(attobo2.others, factor_key = TRUE), aes(x = factor(value))) +
  geom_bar() + 
  facet_wrap(~ key,scales = "free", as.table = TRUE, nrow = 3) + xlab("") + 
  theme_bw()

Power Analysis

pwr.chisq.test(w = .3, df = 2, sig.level =.05, power = .8)
## 
##      Chi squared power calculation 
## 
##               w = 0.3
##               N = 107.0521
##              df = 2
##       sig.level = 0.05
##           power = 0.8
## 
## NOTE: N is the number of observations

Frequentist Estimate

library(glm2)
library(betareg)
library(multcomp)

summary(glm2(method ~ attribution_strat*valence, data = attobo2, family = binomial(link = "logit")))
## 
## Call:
## glm2(formula = method ~ attribution_strat * valence, family = binomial(link = "logit"), 
##     data = attobo2)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -1.12363  -0.45428  -0.41269  -0.00008   2.23854  
## 
## Coefficients:
##                                           Estimate Std. Error z value Pr(>|z|)
## (Intercept)                                -0.1278     0.2923  -0.437 0.661896
## attribution_stratEffort                    -2.0914     0.5543  -3.773 0.000161
## attribution_stratAptitude                  -1.9516     0.5572  -3.503 0.000461
## valenceSuccess                             -2.2925     0.5981  -3.833 0.000126
## attribution_stratEffort:valenceSuccess    -15.0543  1491.3135  -0.010 0.991946
## attribution_stratAptitude:valenceSuccess  -15.1941  1552.2083  -0.010 0.992190
##                                             
## (Intercept)                                 
## attribution_stratEffort                  ***
## attribution_stratAptitude                ***
## valenceSuccess                           ***
## attribution_stratEffort:valenceSuccess      
## attribution_stratAptitude:valenceSuccess    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 218.08  on 291  degrees of freedom
## Residual deviance: 156.78  on 286  degrees of freedom
## AIC: 168.78
## 
## Number of Fisher Scoring iterations: 18
summary(glm2(bruteforce ~ attribution_effort*valence, data = attobo2, family = binomial(link = "logit")))
## 
## Call:
## glm2(formula = bruteforce ~ attribution_effort * valence, family = binomial(link = "logit"), 
##     data = attobo2)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -0.69706  -0.37146  -0.28007  -0.00008   2.55268  
## 
## Coefficients:
##                                            Estimate Std. Error z value Pr(>|z|)
## (Intercept)                                 -1.2910     0.3405  -3.792 0.000149
## attribution_effortStrategy                  -1.3946     0.6870  -2.030 0.042359
## attribution_effortAptitude                  -1.3481     0.6878  -1.960 0.049994
## valenceSuccess                              -1.9279     0.7974  -2.418 0.015623
## attribution_effortStrategy:valenceSuccess  -14.9526  1536.2879  -0.010 0.992234
## attribution_effortAptitude:valenceSuccess  -14.9991  1552.2084  -0.010 0.992290
##                                              
## (Intercept)                               ***
## attribution_effortStrategy                *  
## attribution_effortAptitude                *  
## valenceSuccess                            *  
## attribution_effortStrategy:valenceSuccess    
## attribution_effortAptitude:valenceSuccess    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 140.56  on 291  degrees of freedom
## Residual deviance: 114.49  on 286  degrees of freedom
## AIC: 126.49
## 
## Number of Fisher Scoring iterations: 18
summary(glm2(redo ~ attribution_effort*valence, data = attobo2, family = binomial(link = "logit")))
## 
## Call:
## glm2(formula = redo ~ attribution_effort * valence, family = binomial(link = "logit"), 
##     data = attobo2)
## 
## Deviance Residuals: 
##      Min        1Q    Median        3Q       Max  
## -1.03017  -0.43149  -0.00005  -0.00005   2.20017  
## 
## Coefficients:
##                                            Estimate Std. Error z value Pr(>|z|)
## (Intercept)                                 -0.3567     0.2845  -1.254 0.209989
## attribution_effortStrategy                  -0.8289     0.4468  -1.855 0.063559
## attribution_effortAptitude                  -1.9706     0.5961  -3.306 0.000947
## valenceSuccess                             -20.2094  2458.7599  -0.008 0.993442
## attribution_effortStrategy:valenceSuccess    0.8289  3530.0331   0.000 0.999813
## attribution_effortAptitude:valenceSuccess    1.9706  3548.9143   0.001 0.999557
##                                              
## (Intercept)                                  
## attribution_effortStrategy                .  
## attribution_effortAptitude                ***
## valenceSuccess                               
## attribution_effortStrategy:valenceSuccess    
## attribution_effortAptitude:valenceSuccess    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 218.08  on 291  degrees of freedom
## Residual deviance: 147.25  on 286  degrees of freedom
## AIC: 159.25
## 
## Number of Fisher Scoring iterations: 19
summary(betareg(expectancy ~ attribution*valence, data = attobo2, link = "logit" ))
## 
## Call:
## betareg(formula = expectancy ~ attribution * valence, data = attobo2, 
##     link = "logit")
## 
## Standardized weighted residuals 2:
##     Min      1Q  Median      3Q     Max 
## -6.1414 -0.6306 -0.1762  0.4224  5.9072 
## 
## Coefficients (mean model with logit link):
##                                    Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                         -0.3546     0.1255  -2.826  0.00472 ** 
## attributionEffort                    0.9297     0.1739   5.347 8.94e-08 ***
## attributionStrategy                  0.8195     0.1765   4.643 3.44e-06 ***
## valenceSuccess                       1.9730     0.1895  10.412  < 2e-16 ***
## attributionEffort:valenceSuccess    -0.5824     0.2584  -2.254  0.02419 *  
## attributionStrategy:valenceSuccess  -0.6846     0.2612  -2.621  0.00877 ** 
## 
## Phi coefficients (precision model with identity link):
##       Estimate Std. Error z value Pr(>|z|)    
## (phi)   4.6122     0.3732   12.36   <2e-16 ***
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 
## 
## Type of estimator: ML (maximum likelihood)
## Log-likelihood: 180.1 on 7 Df
## Pseudo R-squared: 0.3291
## Number of iterations: 21 (BFGS) + 2 (Fisher scoring)
Anova(glm2(method ~ attribution_strat*valence, data = attobo2, family = binomial(link = "logit")))
Anova(glm2(bruteforce ~ attribution_effort*valence, data = attobo2, family = binomial(link = "logit")))
Anova(glm2(redo ~ attribution_effort*valence, data = attobo2, family = binomial(link = "logit")))
Anova(betareg(expectancy ~ attribution*valence, data = attobo2, link = "logit" ))

Bayesian Estimate - Method Advice

attribution <- c(attobo2$attribution, c(3, 3, 2, 2, 1, 1))

strategy <- ifelse(attribution == 3, 1,0)
  
effort <- ifelse(attribution == 2, 1, 0) 

valence <- c(attobo2$valence, c(1, 2, 1 ,2, 1 ,2))

valence <- ifelse(valence == 2, 1 ,0)

method <- c(attobo2$method, c(1,1,1,1,1,1) )

attobo2.datjags <- list(strategy = strategy, effort = effort, method = method, N = length(strategy), valence = valence)

Model Specification

attobo2.model <- function() {
  for(i in 1:N){
  
    method[i] ~ dbern(p[i])
    logit(p[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + 
      b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  # b[1] - intercept
  # b[2] - effort (aptitude)
  # b[3] - strategy (aptitude)
  # b[4] - success (failure)
  # b[5] - effort*valence
  # b[6] - strategy*valence
  
  #priors
 for(i in 1:6){
   
   b[i] ~ dnorm(0, 0.0025) 
 }
  
  
  #calculations
  
  for (i in 1:N) {
    
    error[i] <- p[i] - method[i]
    
  }
  
}
attobo2.params <- c("b",  "p", "error")
attobo2.inits1 <-  list( "b" = rep(1, 6))
attobo2.inits2 <- list( "b" = rep(0, 6))
attobo2.inits3 <- list( "b" = rep(-1, 6))
attobo2.inits <- list(attobo2.inits1, attobo2.inits2, attobo2.inits3)
attobo2.inits
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 300000, n.burnin = 100000, model.file = attobo2.model, n.thin = 10)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)

save(attobo2.fit.mcmc, file = "attobo2.method.mcmc.RData")

summary(attobo2.fit.mcmc)

Convergence Diagnostics

load("attobo2.method.mcmc.RData")

MCMCtrace(attobo2.fit.mcmc[, 1:7], ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][, 1:7], ask = FALSE)

attobo2.out.pp <- as.data.frame(as.matrix(attobo2.fit.mcmc))

## saving data
save(attobo2.out.pp, file = "attobo2.method.pp.RData")
id <- 1:298

attobo2.index <- data.frame(attribution, valence, id)
load("attobo2.method.pp.RData")

pred.method <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.method <- pred.method[, c(mixedsort(names(pred.method)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.method[, strategy.success.index]

strategy.success.method.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.method[, strategy.fail.index]

strategy.fail.method.report <- as.report(apply.mean(strategy.fail))
strategy.fail.method.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.48  0.07  0.34  0.48  0.62
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.method[, effort.success.index]

effort.success.method.report <- as.report(apply.mean(effort.success))
effort.success.method.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.method[, effort.fail.index]

effort.fail.method.report <- as.report(apply.mean(effort.fail))
effort.fail.method.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.12  0.04  0.04  0.11  0.21
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.method[, aptitude.success.index]

aptitude.success.method.report <- as.report(apply.mean(aptitude.success))
aptitude.success.method.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.method[, aptitude.fail.index]

aptitude.fail.method.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.method.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.13  0.05  0.05  0.13  0.24
error.method <- attobo2.out.pp[, grep("error[", colnames(attobo2.out.pp), fixed = T)]

error <- as.vector(unlist(lapply(error.method, mean)))
mean(error)
## [1] -4.968397e-05
plot(density(error))

plotting the graph

## estimated count for each participant

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy" & attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort"& attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude"& attobo2$valence == "Fail")))

pred.method$strategy.sim <- apply(pred.method[, strategy.id], 1, mean)
pred.method$effort.sim <- apply(pred.method[, effort.id], 1, mean)
pred.method$aptitude.sim <- apply(pred.method[, aptitude.id], 1, mean)

quantile(pred.method$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.3414874 0.4785708 0.6193511
quantile(pred.method$effort.sim, probs = c(.025, .5, .975))
##       2.5%        50%      97.5% 
## 0.04467494 0.11034440 0.21285400
quantile(pred.method$aptitude.sim, probs = c(.025, .5, .975))
##       2.5%        50%      97.5% 
## 0.05095175 0.12507805 0.24110482
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

aptitude.fail.mean <- apply.mean(aptitude.fail) 

aptitude.success.mean <- apply.mean(aptitude.success)

effort.fail.mean <- apply.mean(effort.fail) 

effort.success.mean <- apply.mean(effort.success)

strategy.fail.mean <- apply.mean(strategy.fail)

strategy.success.mean <- apply.mean(strategy.success) 

attobo2.pp <- cbind(aptitude.fail.mean, aptitude.success.mean, effort.fail.mean, effort.success.mean, strategy.fail.mean, strategy.success.mean)

#str(attobo2.pp)

colnames(attobo2.pp) <- c("pp1", "pp2", "pp3", "pp4", "pp5", "pp6")

# gather - all columns must have diff names
attobo2.plot <- gather(as.data.frame(attobo2.pp))

attobo2.plot$key2 <- attobo2.plot$key

attobo2.plot$key <- recode(attobo2.plot$key, "'pp1' = 'Aptitude'; 'pp2' = 'Aptitude'; 'pp3' = 'Effort'; 'pp4' = 'Effort'; 'pp5' = 'Strategy'; 'pp6' = 'Strategy'")

attobo2.plot$key2 <- recode(attobo2.plot$key2, "'pp1' = 'Failure'; 'pp2' = 'Success'; 'pp3' = 'Failure'; 'pp4' = 'Success'; 'pp5' = 'Failure'; 'pp6' = 'Success'")

colnames(attobo2.plot) <- c("Attribution", "Pr(Method Advice)", "Outcome Valence")

#head(attobo2.plot)

attobo2.posterior.coefs.sum <- summarize(group_by(attobo2.plot, Attribution, `Outcome Valence`),
                                        mean_coef = mean(`Pr(Method Advice)`),
                                        lower_coef = quantile(`Pr(Method Advice)`, probs = c(0.025)),
                                        upper_coef = quantile(`Pr(Method Advice)`, probs = c(0.975)))
#head(attobo2.posterior.coefs.sum)

method.summary <- attobo2.posterior.coefs.sum

Bayesian Estimate - Bruteforce Advice

attribution <- c(attobo2$attribution, c(3, 3, 2, 2, 1, 1))

strategy <- ifelse(attribution == 3, 1,0)
  
effort <- ifelse(attribution == 2, 1, 0) 

valence <- c(attobo2$valence, c(1, 2, 1 ,2, 1 ,2))

valence <- ifelse(valence == 2, 1 ,0)

bruteforce <- c(attobo2$bruteforce, c(1,1,1,1,1,1) )

attobo2.datjags <- list(strategy = strategy, effort = effort, bruteforce = bruteforce, N = length(strategy), valence = valence)
attobo2.datjags

Model Specification

attobo2.model <- function() {
  for(i in 1:N){
  
    bruteforce[i] ~ dbern(p[i])
    logit(p[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + 
      b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  # b[1] - intercept
  # b[2] - effort (aptitude)
  # b[3] - strategy (aptitude)
  # b[4] - success (failure)
  # b[5] - effort*valence
  # b[6] - strategy*valence
  
  #priors
 for(i in 1:6){
   
   b[i] ~ dnorm(0, 0.0025) 
 }
  
  
  #calculations
  
  for (i in 1:N) {
    
    error[i] <- p[i] - bruteforce[i]
    
  }
  
}
attobo2.params <- c("b",  "p", "error")
attobo2.inits1 <-  list( "b" = rep(1, 6))
attobo2.inits2 <- list( "b" = rep(0, 6))
attobo2.inits3 <- list( "b" = rep(-1, 6))
attobo2.inits <- list(attobo2.inits1, attobo2.inits2, attobo2.inits3)
attobo2.inits
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 300000, n.burnin = 100000, model.file = attobo2.model, n.thin = 10)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)

save(attobo2.fit.mcmc, file = "attobo2.bruteforce.mcmc.RData")

summary(attobo2.fit.mcmc)

Convergence Diagnostics

load("attobo2.bruteforce.mcmc.RData")                
MCMCtrace(attobo2.fit.mcmc[, 1:7], ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][, 1:7], ask = FALSE)

attobo2.out.pp <- as.data.frame(as.matrix(attobo2.fit.mcmc))

## saving data
save(attobo2.out.pp, file = "attobo2.bruteforce.pp.RData")
id <- 1:298

attobo2.index <- data.frame(attribution, valence, id)
load("attobo2.bruteforce.pp.RData")

pred.bruteforce <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.bruteforce <- pred.bruteforce[, c(mixedsort(names(pred.bruteforce)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.bruteforce[, strategy.success.index]

strategy.success.bruteforce.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.bruteforce[, strategy.fail.index]

strategy.fail.bruteforce.report <- as.report(apply.mean(strategy.fail))
strategy.fail.bruteforce.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.08  0.04  0.02  0.08  0.17
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.bruteforce[, effort.success.index]

effort.success.bruteforce.report <- as.report(apply.mean(effort.success))
effort.success.bruteforce.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.06  0.03  0.01  0.05  0.13
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.bruteforce[, effort.fail.index]

effort.fail.bruteforce.report <- as.report(apply.mean(effort.fail))
effort.fail.bruteforce.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.23  0.06  0.13  0.23  0.35
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.bruteforce[, aptitude.success.index]

aptitude.success.bruteforce.report <- as.report(apply.mean(aptitude.success))
aptitude.success.bruteforce.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.bruteforce[, aptitude.fail.index]

aptitude.fail.bruteforce.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.bruteforce.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.09  0.04  0.03  0.08  0.18
error.bruteforce <- attobo2.out.pp[, grep("error[", colnames(attobo2.out.pp), fixed = T)]

error <- as.vector(unlist(lapply(error.bruteforce, mean)))
mean(error)
## [1] -2.229667e-05
plot(density(error))

plotting the graph

## estimated count for each participant

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy" & attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort"& attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude"& attobo2$valence == "Fail")))

pred.bruteforce$strategy.sim <- apply(pred.bruteforce[, strategy.id], 1, mean)
pred.bruteforce$effort.sim <- apply(pred.bruteforce[, effort.id], 1, mean)
pred.bruteforce$aptitude.sim <- apply(pred.bruteforce[, aptitude.id], 1, mean)

quantile(pred.bruteforce$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.0238007 0.0776520 0.1739879
quantile(pred.bruteforce$effort.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.1282305 0.2275187 0.3527334
quantile(pred.bruteforce$aptitude.sim, probs = c(.025, .5, .975))
##       2.5%        50%      97.5% 
## 0.02510845 0.08068218 0.18260521
attobo2.posterior.coefs <- pred.bruteforce[, c("strategy.sim", "effort.sim","aptitude.sim")]
names(attobo2.posterior.coefs) <- c("Strategy", "Effort", "Aptitude")

attobo2.posterior.coefs.long <- gather(attobo2.posterior.coefs)
#head(attobo.posterior.coefs.long)
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

aptitude.fail.mean <- apply.mean(aptitude.fail) 

aptitude.success.mean <- apply.mean(aptitude.success)

effort.fail.mean <- apply.mean(effort.fail) 

effort.success.mean <- apply.mean(effort.success)

strategy.fail.mean <- apply.mean(strategy.fail)

strategy.success.mean <- apply.mean(strategy.success) 

attobo2.pp <- cbind(aptitude.fail.mean, aptitude.success.mean, effort.fail.mean, effort.success.mean, strategy.fail.mean, strategy.success.mean)

#str(attobo2.pp)

colnames(attobo2.pp) <- c("pp1", "pp2", "pp3", "pp4", "pp5", "pp6")

# gather - all columns must have diff names
attobo2.plot <- gather(as.data.frame(attobo2.pp))

attobo2.plot$key2 <- attobo2.plot$key

attobo2.plot$key <- recode(attobo2.plot$key, "'pp1' = 'Aptitude'; 'pp2' = 'Aptitude'; 'pp3' = 'Effort'; 'pp4' = 'Effort'; 'pp5' = 'Strategy'; 'pp6' = 'Strategy'")

attobo2.plot$key2 <- recode(attobo2.plot$key2, "'pp1' = 'Failure'; 'pp2' = 'Success'; 'pp3' = 'Failure'; 'pp4' = 'Success'; 'pp5' = 'Failure'; 'pp6' = 'Success'")

colnames(attobo2.plot) <- c("Attribution", "Pr(Bruteforce Advice)", "Outcome Valence")

#head(attobo2.plot)

attobo2.posterior.coefs.sum <- summarize(group_by(attobo2.plot, Attribution, `Outcome Valence`),
                                        mean_coef = mean(`Pr(Bruteforce Advice)`),
                                        lower_coef = quantile(`Pr(Bruteforce Advice)`, probs = c(0.025)),
                                        upper_coef = quantile(`Pr(Bruteforce Advice)`, probs = c(0.975)))
#head(attobo2.posterior.coefs.sum)

brute.summary <- attobo2.posterior.coefs.sum

Bayesian Estimate - Redo Advice

attribution <- c(attobo2$attribution, c(3, 3, 2, 2, 1, 1))

strategy <- ifelse(attribution == 3, 1,0)
  
effort <- ifelse(attribution == 2, 1, 0) 

valence <- c(attobo2$valence, c(1, 2, 1 ,2, 1 ,2))

valence <- ifelse(valence == 2, 1 ,0)

redo <- c(attobo2$redo, c(1,1,1,1,1,1) )

attobo2.datjags <- list(strategy = strategy, effort = effort, redo = redo, N = length(strategy), valence = valence)
attobo2.datjags

Model Specification

attobo2.model <- function() {
  for(i in 1:N){
  
    redo[i] ~ dbern(p[i])
    logit(p[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + 
      b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  # b[1] - intercept
  # b[2] - effort (aptitude)
  # b[3] - strategy (aptitude)
  # b[4] - success (failure)
  # b[5] - effort*valence
  # b[6] - strategy*valence
  
  #priors
 for(i in 1:6){
   
   b[i] ~ dnorm(0, 0.0025) 
 }
  
  
  #calculations
  
  for (i in 1:N) {
    
    error[i] <- p[i] - redo[i]
    
  }
  
}
attobo2.params <- c("b",  "p", "error")
attobo2.inits1 <-  list( "b" = rep(1, 6))
attobo2.inits2 <- list( "b" = rep(0, 6))
attobo2.inits3 <- list( "b" = rep(-1, 6))
attobo2.inits <- list(attobo2.inits1, attobo2.inits2, attobo2.inits3)
attobo2.inits
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 300000, n.burnin = 100000, model.file = attobo2.model, n.thin = 10)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)

save(attobo2.fit.mcmc, file = "attobo2.redo.mcmc.RData")

summary(attobo2.fit.mcmc)

Convergence Diagnostics

load("attobo2.redo.mcmc.RData")              
                        
MCMCtrace(attobo2.fit.mcmc[, 1:7], ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][, 1:7], ask = FALSE)

attobo2.out.pp <- as.data.frame(as.matrix(attobo2.fit.mcmc))

## saving data
save(attobo2.out.pp, file = "attobo2.redo.pp.RData")
id <- 1:298

attobo2.index <- data.frame(attribution, valence, id)
load("attobo2.redo.pp.RData")

pred.redo <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.redo <- pred.redo[, c(mixedsort(names(pred.redo)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.redo[, strategy.success.index]

strategy.success.redo.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.redo[, strategy.fail.index]

strategy.fail.redo.report <- as.report(apply.mean(strategy.fail))
strategy.fail.redo.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.25  0.06  0.14  0.25  0.38
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.redo[, effort.success.index]

effort.success.redo.report <- as.report(apply.mean(effort.success))
effort.success.redo.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.redo[, effort.fail.index]

effort.fail.redo.report <- as.report(apply.mean(effort.fail))
effort.fail.redo.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.42  0.07  0.29  0.42  0.56
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.redo[, aptitude.success.index]

aptitude.success.redo.report <- as.report(apply.mean(aptitude.success))
aptitude.success.redo.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.redo[, aptitude.fail.index]

aptitude.fail.redo.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.redo.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.11  0.05  0.04  0.10  0.21
error.redo <- attobo2.out.pp[, grep("error[", colnames(attobo2.out.pp), fixed = T)]


error <- as.vector(unlist(lapply(error.redo, mean)))
mean(error)
## [1] 1.955135e-05
plot(density(error))

plotting the graph

## estimated count for each participant

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy" & attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort"& attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude"& attobo2$valence == "Fail")))

pred.redo$strategy.sim <- apply(pred.redo[, strategy.id], 1, mean)
pred.redo$effort.sim <- apply(pred.redo[, effort.id], 1, mean)
pred.redo$aptitude.sim <- apply(pred.redo[, aptitude.id], 1, mean)

quantile(pred.redo$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.1398839 0.2463713 0.3801704
quantile(pred.redo$effort.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.2936669 0.4222419 0.5580070
quantile(pred.redo$aptitude.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.0374378 0.1033730 0.2122305
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

aptitude.fail.mean <- apply.mean(aptitude.fail) 

aptitude.success.mean <- apply.mean(aptitude.success)

effort.fail.mean <- apply.mean(effort.fail) 

effort.success.mean <- apply.mean(effort.success)

strategy.fail.mean <- apply.mean(strategy.fail)

strategy.success.mean <- apply.mean(strategy.success) 

attobo2.pp <- cbind(aptitude.fail.mean, aptitude.success.mean, effort.fail.mean, effort.success.mean, strategy.fail.mean, strategy.success.mean)

#str(attobo2.pp)

colnames(attobo2.pp) <- c("pp1", "pp2", "pp3", "pp4", "pp5", "pp6")

# gather - all columns must have diff names
attobo2.plot <- gather(as.data.frame(attobo2.pp))

attobo2.plot$key2 <- attobo2.plot$key

attobo2.plot$key <- recode(attobo2.plot$key, "'pp1' = 'Aptitude'; 'pp2' = 'Aptitude'; 'pp3' = 'Effort'; 'pp4' = 'Effort'; 'pp5' = 'Strategy'; 'pp6' = 'Strategy'")

attobo2.plot$key2 <- recode(attobo2.plot$key2, "'pp1' = 'Failure'; 'pp2' = 'Success'; 'pp3' = 'Failure'; 'pp4' = 'Success'; 'pp5' = 'Failure'; 'pp6' = 'Success'")

colnames(attobo2.plot) <- c("Attribution", "Pr(Redo Advice)", "Outcome Valence")

#head(attobo2.plot)

attobo2.posterior.coefs.sum <- summarize(group_by(attobo2.plot, Attribution, `Outcome Valence`),
                                        mean_coef = mean(`Pr(Redo Advice)`),
                                        lower_coef = quantile(`Pr(Redo Advice)`, probs = c(0.025)),
                                        upper_coef = quantile(`Pr(Redo Advice)`, probs = c(0.975)))
#head(attobo2.posterior.coefs.sum)

redo.summary <- attobo2.posterior.coefs.sum

Bayesian Estimate - Redo with feedback Advice

attribution <- c(attobo2$attribution, c(3, 3, 2, 2, 1, 1))

strategy <- ifelse(attribution == 3, 1,0)
  
effort <- ifelse(attribution == 2, 1, 0) 

valence <- c(attobo2$valence, c(1, 2, 1 ,2, 1 ,2))

valence <- ifelse(valence == 2, 1 ,0)

redo.feedback <- c(attobo2$redo.feedback, c(1,1,1,1,1,1) )

attobo2.datjags <- list(strategy = strategy, effort = effort, redo.feedback = redo.feedback, N = length(strategy), valence = valence)
attobo2.datjags

Model Specification

attobo2.model <- function() {
  for(i in 1:N){
  
    redo.feedback[i] ~ dbern(p[i])
    logit(p[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + 
      b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  # b[1] - intercept
  # b[2] - effort (aptitude)
  # b[3] - strategy (aptitude)
  # b[4] - success (failure)
  # b[5] - effort*valence
  # b[6] - strategy*valence
  
  #priors
 for(i in 1:6){
   
   b[i] ~ dnorm(0, 0.0025) 
 }
  
  
  #calculations
  
  for (i in 1:N) {
    
    error[i] <- p[i] - redo.feedback[i]
    
  }
  
}
attobo2.params <- c("b",  "p", "error")
attobo2.inits1 <-  list( "b" = rep(1, 6))
attobo2.inits2 <- list( "b" = rep(0, 6))
attobo2.inits3 <- list( "b" = rep(-1, 6))
attobo2.inits <- list(attobo2.inits1, attobo2.inits2, attobo2.inits3)
attobo2.inits
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 300000, n.burnin = 100000, model.file = attobo2.model, n.thin = 10)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)

save(attobo2.fit.mcmc, file = "attobo2.redo.feedback.mcmc.RData")

summary(attobo2.fit.mcmc)

Convergence Diagnostics

load("attobo2.redo.feedback.mcmc.RData")
                                    
MCMCtrace(attobo2.fit.mcmc[, 1:7], ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][, 1:7], ask = FALSE)

attobo2.out.pp <- as.data.frame(as.matrix(as.mcmc(attobo2.fit)))

## saving data
save(attobo2.out.pp, file = "attobo2.redo.feedback.pp.RData")
id <- 1:298

attobo2.index <- data.frame(attribution, valence, id)
load("attobo2.redo.feedback.pp.RData")

pred.redo.feedback <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.redo.feedback <- pred.redo.feedback[, c(mixedsort(names(pred.redo.feedback)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.redo.feedback[, strategy.success.index]

strategy.success.redo.feedback.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.redo.feedback[, strategy.fail.index]

strategy.fail.redo.feedback.report <- as.report(apply.mean(strategy.fail))
strategy.fail.redo.feedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.17  0.05  0.08  0.16  0.28
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.redo.feedback[, effort.success.index]

effort.success.redo.feedback.report <- as.report(apply.mean(effort.success))
effort.success.redo.feedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.redo.feedback[, effort.fail.index]

effort.fail.redo.feedback.report <- as.report(apply.mean(effort.fail))
effort.fail.redo.feedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.04  0.03  0.00  0.03  0.10
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.redo.feedback[, aptitude.success.index]

aptitude.success.redo.feedback.report <- as.report(apply.mean(aptitude.success))
aptitude.success.redo.feedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.redo.feedback[, aptitude.fail.index]

aptitude.fail.redo.feedback.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.redo.feedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.04  0.03  0.01  0.04  0.12
error.redo.feedback <- attobo2.out.pp[, grep("error[", colnames(attobo2.out.pp), fixed = T)]

error <- as.vector(unlist(lapply(error.redo.feedback, mean)))
mean(error)
## [1] -7.978423e-05
plot(density(error))

plotting the graph

## estimated count for each participant

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy" & attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort"& attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude"& attobo2$valence == "Fail")))

pred.redo.feedback$strategy.sim <- apply(pred.redo.feedback[, strategy.id], 1, mean)
pred.redo.feedback$effort.sim <- apply(pred.redo.feedback[, effort.id], 1, mean)
pred.redo.feedback$aptitude.sim <- apply(pred.redo.feedback[, aptitude.id], 1, mean)

quantile(pred.redo.feedback$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.0762537 0.1611735 0.2824771
quantile(pred.redo.feedback$effort.sim, probs = c(.025, .5, .975))
##        2.5%         50%       97.5% 
## 0.004659729 0.032670631 0.103834084
quantile(pred.redo.feedback$aptitude.sim, probs = c(.025, .5, .975))
##        2.5%         50%       97.5% 
## 0.005708272 0.037399870 0.118863122
attobo2.posterior.coefs <- pred.redo.feedback[,c("strategy.sim", "effort.sim","aptitude.sim")]
names(attobo2.posterior.coefs) <- c("Strategy", "Effort", "Aptitude")

attobo2.posterior.coefs.long <- gather(attobo2.posterior.coefs)
#head(attobo.posterior.coefs.long)
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

Bayesian Estimate - Redo no feedback Advice

attribution <- c(attobo2$attribution, c(3, 3, 2, 2, 1, 1))

strategy <- ifelse(attribution == 3, 1,0)
  
effort <- ifelse(attribution == 2, 1, 0) 

valence <- c(attobo2$valence, c(1, 2, 1 ,2, 1 ,2))

valence <- ifelse(valence == 2, 1 ,0)

redo.nofeedback <- c(attobo2$redo.nofeedback, c(1,1,1,1,1,1) )

attobo2.datjags <- list(strategy = strategy, effort = effort, redo.nofeedback = redo.nofeedback, N = length(strategy), valence = valence)
attobo2.datjags

Model Specification

attobo2.model <- function() {
  for(i in 1:N){
  
    redo.nofeedback[i] ~ dbern(p[i])
    logit(p[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + 
      b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  # b[1] - intercept
  # b[2] - effort (aptitude)
  # b[3] - strategy (aptitude)
  # b[4] - success (failure)
  # b[5] - effort*valence
  # b[6] - strategy*valence
  
  #priors
 for(i in 1:6){
   
   b[i] ~ dnorm(0, 0.0025) 
 }
  
  
  #calculations
  
  for (i in 1:N) {
    
    error[i] <- p[i] - redo.nofeedback[i]
    
  }
  
}
attobo2.params <- c("b",  "p", "error")
attobo2.inits1 <-  list( "b" = rep(1, 6))
attobo2.inits2 <- list( "b" = rep(0, 6))
attobo2.inits3 <- list( "b" = rep(-1, 6))
attobo2.inits <- list(attobo2.inits1, attobo2.inits2, attobo2.inits3)
attobo2.inits
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 300000, n.burnin = 100000, model.file = attobo2.model, n.thin = 10)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)

save(attobo2.fit.mcmc, file = "attobo2.redo.nofeedback.mcmc.RData")


summary(attobo2.fit.mcmc)

Convergence Diagnostics

load("attobo2.redo.nofeedback.mcmc.RData")                     
MCMCtrace(attobo2.fit.mcmc[, 1:7], ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][, 1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][, 1:7], ask = FALSE)

attobo2.out.pp <- as.data.frame(as.matrix(as.mcmc(attobo2.fit)))

## saving data
save(attobo2.out.pp, file = "attobo2.redo.nofeedback.pp.RData")
id <- 1:298

attobo2.index <- data.frame(attribution, valence, id)
load("attobo2.redo.nofeedback.pp.RData")

pred.redo.nofeedback <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.redo.nofeedback <- pred.redo.nofeedback[, c(mixedsort(names(pred.redo.nofeedback)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.redo.nofeedback[, strategy.success.index]

strategy.success.redo.nofeedback.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.redo.nofeedback[, strategy.fail.index]

strategy.fail.redo.nofeedback.report <- as.report(apply.mean(strategy.fail))
strategy.fail.redo.nofeedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.10  0.04  0.04  0.10  0.20
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.redo.nofeedback[, effort.success.index]

effort.success.redo.nofeedback.report <- as.report(apply.mean(effort.success))
effort.success.redo.nofeedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.redo.nofeedback[, effort.fail.index]

effort.fail.redo.nofeedback.report <- as.report(apply.mean(effort.fail))
effort.fail.redo.nofeedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.40  0.07  0.28  0.40  0.54
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.redo.nofeedback[, aptitude.success.index]

aptitude.success.redo.nofeedback.report <- as.report(apply.mean(aptitude.success))
aptitude.success.redo.nofeedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.redo.nofeedback[, aptitude.fail.index]

aptitude.fail.redo.nofeedback.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.redo.nofeedback.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.09  0.04  0.03  0.08  0.18
error.redo.nofeedback <- attobo2.out.pp[, grep("error[", colnames(attobo2.out.pp), fixed = T)]

error <- as.vector(unlist(lapply(error.redo.nofeedback, mean)))
mean(error)
## [1] 1.40576e-05
plot(density(error))

plotting the graph

## estimated count for each participant

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy" & attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort"& attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude"& attobo2$valence == "Fail")))

pred.redo.nofeedback$strategy.sim <- apply(pred.redo.nofeedback[, strategy.id], 1, mean)
pred.redo.nofeedback$effort.sim <- apply(pred.redo.nofeedback[, effort.id], 1, mean)
pred.redo.nofeedback$aptitude.sim <- apply(pred.redo.nofeedback[, aptitude.id], 1, mean)

quantile(pred.redo.nofeedback$strategy.sim, probs = c(.025, .5, .975))
##       2.5%        50%      97.5% 
## 0.03533705 0.09878644 0.20276179
quantile(pred.redo.nofeedback$effort.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.2755875 0.4017565 0.5381963
quantile(pred.redo.nofeedback$aptitude.sim, probs = c(.025, .5, .975))
##       2.5%        50%      97.5% 
## 0.02527176 0.08157364 0.18315762
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

Expectancy

Bayesian Model - Expectancy

strategy <- ifelse(attobo2$attribution == "Strategy", 1,0)
effort <- ifelse(attobo2$attribution == "Effort", 1, 0) 

expectancy <- ifelse(attobo2$expectancy == 1, .999,
                     ifelse(attobo2$expectancy == 0, .001, attobo2$expectancy) )

valence <- c(attobo2$valence)

valence <- ifelse(valence == 2, 1 ,0)

attribution <- c(attobo2$attribution)


attobo2.datjags <- list(strategy = strategy, effort = effort, N = nrow(attobo2), expectancy = expectancy, valence = valence)

Model Specification

attobo2.model <- function() {
  
  for(i in 1:N){
  
    expectancy[i] ~ dbeta(alpha[i], beta[i])
    alpha[i] = phi * mu[i]
    beta[i] = phi * (1 - mu[i])
    logit(mu[i]) <- b[1] + b[2]*effort[i] + b[3]*strategy[i] + b[4]*valence[i] + b[5]*effort[i]*valence[i] + b[6]*strategy[i]*valence[i]
  }
  
  #priors
  
 for(i in 1:6){
  b[i] ~ dnorm(0, .0025)
  }
  
    phiinv ~ dgamma(0.01,0.01)
   phi <- 1/phiinv 

  
  #calculations
  for (i in 1:N){
    
    error[i] <- mu[i] - expectancy[i]
    
  }
  
}
attobo2.params <- c("b", "mu", "error")
attobo2.inits1 <-  list( "b" = rep(-1,6), "phiinv" = 1)
attobo2.inits2 <- list( "b" = rep(0, 6) , "phiinv" =2 )
attobo2.inits3 <- list( "b" = rep(1, 6), "phiinv" = 1 )
attobo2.inits <- list(attobo2.inits1, attobo2.inits2,attobo2.inits3)
set.seed(1128)
attobo2.fit <- jags(data = attobo2.datjags, inits = attobo2.inits, parameters.to.save = attobo2.params, 
                n.chains = 3, n.iter = 30000, n.burnin = 10000, model.file = attobo2.model)

attobo2.fit.upd <- update(attobo2.fit, n.iter =1000)
attobo2.fit.upd <- autojags(attobo2.fit)
attobo2.fit.mcmc <- as.mcmc(attobo2.fit)
save(attobo2.fit.mcmc, file = "attobo2.expectancy.mcmc.RData")

Convergence Diagnostics

load("attobo2.expectancy.mcmc.RData")                                      
MCMCtrace(attobo2.fit.mcmc[, 1:7] , ind = TRUE, pdf = FALSE )

autocorr.plot(attobo2.fit.mcmc[1][,1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[2][,1:7], ask = FALSE)

autocorr.plot(attobo2.fit.mcmc[3][,1:7], ask = FALSE)

id <- 1:292

attobo2.index <- data.frame(attribution, valence, id)
attobo2.out <- as.data.frame(as.matrix(attobo2.fit.mcmc))

save(attobo2.out, file = "attobo2.expectancy.pp.RData")
load("attobo2.expectancy.pp.RData")

error.expectancy <- attobo2.out[, grep("error[", colnames(attobo2.out), fixed = T)]

error <- as.vector(unlist(lapply(error.expectancy, mean)))
mean(error)
## [1] -0.000457553
plot(density(error))

pred.expectancy <- attobo2.out.pp[, grep("p[", colnames(attobo2.out.pp), fixed = T)]
pred.expectancy <- pred.expectancy[, c(mixedsort(names(pred.expectancy)))]

strategy.success.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 1) 

strategy.success.index <- strategy.success.index[, "id"]


strategy.success <- pred.expectancy[, strategy.success.index]

strategy.success.expectancy.report <- as.report(apply.mean(strategy.success))

strategy.fail.index <- attobo2.index %>%
  filter(attribution == 3 & valence == 0) 

strategy.fail.index <- strategy.fail.index[, "id"]

strategy.fail <- pred.expectancy[, strategy.fail.index]

strategy.fail.expectancy.report <- as.report(apply.mean(strategy.fail))
strategy.fail.expectancy.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.10  0.04  0.04  0.10  0.20
effort.success.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 1)

effort.success.index <- effort.success.index[, "id"]

effort.success <- pred.expectancy[, effort.success.index]

effort.success.expectancy.report <- as.report(apply.mean(effort.success))
effort.success.expectancy.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
effort.fail.index <- attobo2.index %>%
  filter(attribution == 2 & valence == 0) 

effort.fail.index <- effort.fail.index[, "id"]

effort.fail <- pred.expectancy[, effort.fail.index]

effort.fail.expectancy.report <- as.report(apply.mean(effort.fail))
effort.fail.expectancy.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.40  0.07  0.28  0.40  0.54
aptitude.success.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 1) 

aptitude.success.index <- aptitude.success.index[,"id"]

aptitude.success <- pred.expectancy[, aptitude.success.index]

aptitude.success.expectancy.report <- as.report(apply.mean(aptitude.success))
aptitude.success.expectancy.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.02  0.02  0.00  0.01  0.07
aptitude.fail.index <- attobo2.index %>%
  filter(attribution == 1 & valence == 0) 

aptitude.fail.index <- aptitude.fail.index[, "id"] 

aptitude.fail <- pred.expectancy[, aptitude.fail.index]

aptitude.fail.expectancy.report <- as.report(apply.mean(aptitude.fail))
aptitude.fail.expectancy.report
##  Mean    SD  2.5%   50% 97.5% 
##  0.09  0.04  0.03  0.08  0.18
aptitude.fail.mean <- apply.mean(aptitude.fail) 

aptitude.success.mean <- apply.mean(aptitude.success)

effort.fail.mean <- apply.mean(effort.fail) 

effort.success.mean <- apply.mean(effort.success)

strategy.fail.mean <- apply.mean(strategy.fail)

strategy.success.mean <- apply.mean(strategy.success) 

attobo2.pp <- cbind(aptitude.fail.mean, aptitude.success.mean, effort.fail.mean, effort.success.mean, strategy.fail.mean, strategy.success.mean)

#str(attobo2.pp)

colnames(attobo2.pp) <- c("pp1", "pp2", "pp3", "pp4", "pp5", "pp6")

# gather - all columns must have diff names
attobo2.plot <- gather(as.data.frame(attobo2.pp))

attobo2.plot$key2 <- attobo2.plot$key

attobo2.plot$key <- recode(attobo2.plot$key, "'pp1' = 'Aptitude'; 'pp2' = 'Aptitude'; 'pp3' = 'Effort'; 'pp4' = 'Effort'; 'pp5' = 'Strategy'; 'pp6' = 'Strategy'")

attobo2.plot$key2 <- recode(attobo2.plot$key2, "'pp1' = 'Failure'; 'pp2' = 'Success'; 'pp3' = 'Failure'; 'pp4' = 'Success'; 'pp5' = 'Failure'; 'pp6' = 'Success'")

colnames(attobo2.plot) <- c("Attribution", "Expectancy", "Outcome Valence")

#head(attobo2.plot)

attobo2.posterior.coefs.sum <- summarize(group_by(attobo2.plot, Attribution, `Outcome Valence`),
                                        mean_coef = mean(`Expectancy`),
                                        lower_coef = quantile(`Expectancy`, probs = c(0.025)),
                                        upper_coef = quantile(`Expectancy`, probs = c(0.975)))
#head(attobo2.posterior.coefs.sum)

expectancy.summary <- attobo2.posterior.coefs.sum
pred.expectancy <- attobo2.out[, grep("mu[", colnames(attobo2.out), fixed = T)]

## estimated count for each participant
pred.expectancy <- pred.expectancy[, c(mixedsort(names(pred.expectancy)))]

strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy"& attobo2$valence == "Fail")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort" & attobo2$valence == "Fail")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude" & attobo2$valence == "Fail")))

pred.expectancy$strategy.sim <- apply(pred.expectancy[, strategy.id], 1, mean)
pred.expectancy$effort.sim <- apply(pred.expectancy[, effort.id], 1, mean)
pred.expectancy$aptitude.sim <- apply(pred.expectancy[, aptitude.id], 1, mean)

quantile(pred.expectancy$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.5546821 0.6152940 0.6707751
quantile(pred.expectancy$effort.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.5836261 0.6389262 0.6918349
quantile(pred.expectancy$aptitude.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.3535172 0.4119981 0.4734945
mean(pred.expectancy$strategy.sim, probs = c(.025, .5, .975))
## [1] 0.6147642
mean(pred.expectancy$effort.sim, probs = c(.025, .5, .975))
## [1] 0.6387751
mean(pred.expectancy$aptitude.sim, probs = c(.025, .5, .975))
## [1] 0.4117914
strategy.id <- as.numeric(unlist(which(attobo2$attribution == "Strategy"& attobo2$valence == "Success")))
effort.id <- as.numeric(unlist(which(attobo2$attribution == "Effort" & attobo2$valence == "Success")))
aptitude.id <- as.numeric(unlist(which(attobo2$attribution == "Aptitude" & attobo2$valence == "Success")))

pred.expectancy$strategy.sim <- apply(pred.expectancy[, strategy.id], 1, mean)
pred.expectancy$effort.sim <- apply(pred.expectancy[, effort.id], 1, mean)
pred.expectancy$aptitude.sim <- apply(pred.expectancy[, aptitude.id], 1, mean)

quantile(pred.expectancy$strategy.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.8164989 0.8526081 0.8839656
quantile(pred.expectancy$effort.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.8451126 0.8767756 0.9049234
quantile(pred.expectancy$aptitude.sim, probs = c(.025, .5, .975))
##      2.5%       50%     97.5% 
## 0.7936045 0.8340198 0.8720223
mean(pred.expectancy$strategy.sim, probs = c(.025, .5, .975))
## [1] 0.8521303
mean(pred.expectancy$effort.sim, probs = c(.025, .5, .975))
## [1] 0.8762882
mean(pred.expectancy$aptitude.sim, probs = c(.025, .5, .975))
## [1] 0.8336659

Expected Counts for each attribution

attobo2.posterior.coefs <- pred.expectancy[,c("strategy.sim", "effort.sim","aptitude.sim")]
names(attobo2.posterior.coefs) <- c("Strategy", "Effort", "Aptitude")

attobo2.posterior.coefs.long <- gather(attobo2.posterior.coefs)
#head(attobo2.posterior.coefs.long)
library("ggridges")
ggplot(data = attobo2.posterior.coefs.long, aes(x = value, y = key)) + 
  stat_density_ridges(quantile_lines = TRUE, 
                      quantiles = c(0.025, 0.5, 0.975), 
                      alpha = 0.7)

Consolidating Reports

attobo2.posteriors <- list(
  strategy.success.method.report=strategy.success.method.report, 
  strategy.fail.method.report=strategy.fail.method.report,
  effort.success.method.report=effort.success.method.report, 
  effort.fail.method.report=effort.fail.method.report,
  aptitude.success.method.report=aptitude.success.method.report,
  aptitude.fail.method.report= aptitude.fail.method.report,
  
  strategy.success.bruteforce.report=strategy.success.bruteforce.report,
  strategy.fail.bruteforce.report=strategy.fail.bruteforce.report,
  effort.success.bruteforce.report=effort.success.bruteforce.report,
  effort.fail.bruteforce.report=effort.fail.bruteforce.report,
  aptitude.success.bruteforce.report=aptitude.success.bruteforce.report,
  aptitude.fail.bruteforce.report=aptitude.fail.bruteforce.report,
  
  strategy.success.redo.report=strategy.success.redo.report,
  strategy.fail.redo.report=strategy.fail.redo.report,
  effort.success.redo.report=effort.success.redo.report,
  effort.fail.redo.report=effort.fail.redo.report,
  aptitude.success.redo.report=aptitude.success.redo.report, 
  aptitude.fail.redo.report=aptitude.fail.redo.report,
  
  strategy.success.expectancy.report = strategy.success.expectancy.report,
  strategy.fail.expectancy.report = strategy.fail.expectancy.report,
  effort.success.expectancy.report = effort.success.expectancy.report,
  effort.fail.expectancy.report = effort.fail.expectancy.report,
  aptitude.success.expectancy.report = aptitude.success.expectancy.report,
  aptitude.fail.expectancy.report = aptitude.fail.expectancy.report
)

attobo2.posteriors

save(attobo2.posteriors, file = "attobo2.posteriors.RData")